void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
{
- if ( !is_hvm_domain(d) || !atomic_read(&v->vm_event_pause_count) )
+ if ( !is_hvm_domain(d) )
return;
+ ASSERT(atomic_read(&v->vm_event_pause_count));
+
hvm_toggle_singlestep(v);
}
ASSERT(w);
+ /* deny flag requires the vCPU to be paused */
+ if ( !atomic_read(&v->vm_event_pause_count) )
+ return;
+
switch ( rsp->reason )
{
case VM_EVENT_REASON_MOV_TO_MSR:
void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp)
{
+ ASSERT(atomic_read(&v->vm_event_pause_count));
+
v->arch.user_regs.eax = rsp->data.regs.x86.rax;
v->arch.user_regs.ebx = rsp->data.regs.x86.rbx;
v->arch.user_regs.ecx = rsp->data.regs.x86.rcx;
if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
p2m_altp2m_check(v, rsp.altp2m_idx);
- if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+ /* Check flags which apply only when the vCPU is paused */
+ if ( atomic_read(&v->vm_event_pause_count) )
{
if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
vm_event_set_registers(v, &rsp);
if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
vm_event_toggle_singlestep(d, v);
- vm_event_vcpu_unpause(v);
+ if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
+ vm_event_vcpu_unpause(v);
}
}
}
/*
* Deny completion of the operation that triggered the event.
* Currently only useful for MSR, CR0, CR3 and CR4 write events.
+ * Requires the vCPU to be paused already (synchronous events only).
*/
#define VM_EVENT_FLAG_DENY (1 << 6)
/*